another manual merge.
}
xmem_cache_t *domain_struct_cachep;
+xmem_cache_t *exec_domain_struct_cachep;
+
void __init domain_startofday(void)
{
domain_struct_cachep = xmem_cache_create(
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if ( domain_struct_cachep == NULL )
panic("No slab cache for domain structs.");
+
+ exec_domain_struct_cachep = xmem_cache_create(
+ "exec_dom_cache", sizeof(struct exec_domain),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if ( exec_domain_struct_cachep == NULL )
+ BUG();
}
struct domain *arch_alloc_domain_struct(void)
xmem_cache_free(domain_struct_cachep, d);
}
+struct exec_domain *arch_alloc_exec_domain_struct(void)
+{
+ return xmem_cache_alloc(exec_domain_struct_cachep);
+}
+
+void arch_free_exec_domain_struct(struct exec_domain *ed)
+{
+ xmem_cache_free(exec_domain_struct_cachep, ed);
+}
+
void free_perdomain_pt(struct domain *d)
{
free_xenheap_page((unsigned long)d->mm_perdomain_pt);
}
else
{
- MEM_LOG("Error while installing new baseptr %08lx", ptr);
+ MEM_LOG("Error while installing new baseptr %08lx", pfn);
}
return okay;
LOCK_BIGLOCK(d);
- cleanup_writable_pagetable(d, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
+ cleanup_writable_pagetable(d);
/*
* If we are resuming after preemption, read how much work we have already
LOCK_BIGLOCK(d);
- cleanup_writable_pagetable(d, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
+ cleanup_writable_pagetable(d);
/*
* XXX When we make this support 4MB superpages we should also deal with
out:
if ( c != NULL )
xfree(c);
- xmem_cache_free(exec_domain_struct_cachep, d->exec_domain[vcpu]);
+ arch_free_exec_domain_struct(d->exec_domain[vcpu]);
d->exec_domain[vcpu] = NULL;
return rc;
}
/* Per-CPU periodic timer sends an event to the currently-executing domain. */
static struct ac_timer t_timer[NR_CPUS];
-extern xmem_cache_t *domain_struct_cachep;
-extern xmem_cache_t *exec_domain_struct_cachep;
-
void free_domain_struct(struct domain *d)
{
struct exec_domain *ed;
SCHED_OP(free_task, d);
for_each_exec_domain(d, ed)
- xmem_cache_free(exec_domain_struct_cachep, ed);
- xmem_cache_free(domain_struct_cachep, d);
+ arch_free_exec_domain_struct(ed);
+ arch_free_domain_struct(d);
}
struct exec_domain *alloc_exec_domain_struct(struct domain *d,
ASSERT( d->exec_domain[vcpu] == NULL );
- if ( (ed = xmem_cache_alloc(exec_domain_struct_cachep)) == NULL )
+ if ( (ed = arch_alloc_exec_domain_struct()) == NULL )
return NULL;
memset(ed, 0, sizeof(*ed));
out:
d->exec_domain[vcpu] = NULL;
- xmem_cache_free(exec_domain_struct_cachep, ed);
+ arch_free_exec_domain_struct(ed);
return NULL;
}
{
struct domain *d;
- if ( (d = xmem_cache_alloc(domain_struct_cachep)) == NULL )
+ if ( (d = arch_alloc_domain_struct()) == NULL )
return NULL;
memset(d, 0, sizeof(*d));
return d;
out:
- xmem_cache_free(domain_struct_cachep, d);
+ arch_free_domain_struct(d);
return NULL;
}
if ( !is_idle_task(current->domain) )
{
LOCK_BIGLOCK(current->domain);
- cleanup_writable_pagetable(
- prev->domain, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
+ cleanup_writable_pagetable(prev->domain);
UNLOCK_BIGLOCK(current->domain);
}
extern void arch_free_domain_struct(struct domain *d);
-extern void arch_do_createdomain(struct domain *d);
+struct exec_domain *arch_alloc_exec_domain_struct(void);
+
+extern void arch_free_exec_domain_struct(struct exec_domain *ed);
+
+extern void arch_do_createdomain(struct exec_domain *ed);
extern int arch_final_setup_guestos(
struct exec_domain *d, full_execution_context_t *c);